#include <xen/domain_page.h>
#include <xen/hypercall.h>
#include <xen/perfc.h>
+#include <xen/xenoprof.h>
#include <asm/current.h>
#include <asm/io.h>
#include <asm/regs.h>
{
vmx_destroy_vmcs(v);
vpmu_destroy(v);
+ passive_domain_destroy(v);
}
#ifdef __x86_64__
default:
if ( vpmu_do_rdmsr(regs) )
goto done;
+ if ( passive_domain_do_rdmsr(regs) )
+ goto done;
switch ( long_mode_do_msr_read(regs) )
{
case HNDL_unhandled:
default:
if ( vpmu_do_wrmsr(regs) )
return X86EMUL_OKAY;
+ if ( passive_domain_do_wrmsr(regs) )
+ return X86EMUL_OKAY;
if ( wrmsr_viridian_regs(ecx, regs->eax, regs->edx) )
break;
#include <asm/hvm/vmx/vpmu.h>
#include <asm/hvm/vmx/vpmu_core2.h>
+u32 core2_counters_msr[] = {
+ MSR_CORE_PERF_FIXED_CTR0,
+ MSR_CORE_PERF_FIXED_CTR1,
+ MSR_CORE_PERF_FIXED_CTR2};
+
+/* Core 2 Non-architectual Performance Control MSRs. */
+u32 core2_ctrls_msr[] = {
+ MSR_CORE_PERF_FIXED_CTR_CTRL,
+ MSR_IA32_PEBS_ENABLE,
+ MSR_IA32_DS_AREA};
+
+struct pmumsr core2_counters = {
+ 3,
+ core2_counters_msr
+};
+
+struct pmumsr core2_ctrls = {
+ 3,
+ core2_ctrls_msr
+};
static int arch_pmc_cnt;
static int core2_get_pmc_count(void)
static char *cpu_type;
extern int is_active(struct domain *d);
+extern int is_passive(struct domain *d);
+
+int passive_domain_do_rdmsr(struct cpu_user_regs *regs)
+{
+ u64 msr_content;
+ int type, index;
+ struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+ if ( model->is_arch_pmu_msr == NULL )
+ return 0;
+ if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) )
+ return 0;
+ if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( ! model->allocated_msr(current) )
+ return 0;
+
+ model->load_msr(current, type, index, &msr_content);
+ regs->eax = msr_content & 0xFFFFFFFF;
+ regs->edx = msr_content >> 32;
+ return 1;
+}
+
+
+int passive_domain_do_wrmsr(struct cpu_user_regs *regs)
+{
+ u64 msr_content;
+ int type, index;
+ struct vpmu_struct *vpmu = vcpu_vpmu(current);
+
+ if ( model->is_arch_pmu_msr == NULL )
+ return 0;
+ if ( !model->is_arch_pmu_msr((u64)regs->ecx, &type, &index) )
+ return 0;
+
+ if ( !(vpmu->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ if ( ! model->allocated_msr(current) )
+ return 0;
+
+ msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
+ model->save_msr(current, type, index, msr_content);
+ return 1;
+}
+
+void passive_domain_destroy(struct vcpu *v)
+{
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ if ( vpmu->flags & PASSIVE_DOMAIN_ALLOCATED )
+ model->free_msr(v);
+}
static int nmi_callback(struct cpu_user_regs *regs, int cpu)
{
if ( ovf && is_active(current->domain) && !xen_mode )
send_guest_vcpu_virq(current, VIRQ_XENOPROF);
+ if ( ovf == 2 )
+ test_and_set_bool(current->nmi_pending);
return 1;
}
#include <xen/sched.h>
#include <asm/regs.h>
#include <asm/current.h>
+#include <asm/hvm/vmx/vpmu.h>
+#include <asm/hvm/vmx/vpmu_core2.h>
#include "op_x86_model.h"
#include "op_counter.h"
#define CTRL_SET_KERN(val,k) (val |= ((k & 1) << 17))
#define CTRL_SET_UM(val, m) (val |= (m << 8))
#define CTRL_SET_EVENT(val, e) (val |= e)
-
+#define IS_ACTIVE(val) (val & (1 << 22) )
+#define IS_ENABLE(val) (val & (1 << 20) )
static unsigned long reset_value[NUM_COUNTERS];
int ppro_has_global_ctrl = 0;
+extern int is_passive(struct domain *d);
static void ppro_fill_in_addresses(struct op_msrs * const msrs)
{
int ovf = 0;
unsigned long eip = regs->eip;
int mode = xenoprofile_get_mode(current, regs);
+ struct arch_msr_pair *msrs_content = vcpu_vpmu(current)->context;
for (i = 0 ; i < NUM_COUNTERS; ++i) {
if (!reset_value[i])
if (CTR_OVERFLOWED(low)) {
xenoprof_log_event(current, regs, eip, mode, i);
CTR_WRITE(reset_value[i], msrs, i);
- ovf = 1;
+ if ( is_passive(current->domain) && (mode != 2) &&
+ (vcpu_vpmu(current)->flags & PASSIVE_DOMAIN_ALLOCATED) )
+ {
+ if ( IS_ACTIVE(msrs_content[i].control) )
+ {
+ msrs_content[i].counter = (low | (unsigned long)high << 32);
+ if ( IS_ENABLE(msrs_content[i].control) )
+ ovf = 2;
+ }
+ }
+ if ( !ovf )
+ ovf = 1;
}
}
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
}
+static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
+{
+ if ( (msr_index >= MSR_IA32_PERFCTR0) &&
+ (msr_index < (MSR_IA32_PERFCTR0 + NUM_COUNTERS)) )
+ {
+ *type = MSR_TYPE_ARCH_COUNTER;
+ *index = msr_index - MSR_IA32_PERFCTR0;
+ return 1;
+ }
+ if ( (msr_index >= MSR_P6_EVNTSEL0) &&
+ (msr_index < (MSR_P6_EVNTSEL0 + NUM_CONTROLS)) )
+ {
+ *type = MSR_TYPE_ARCH_CTRL;
+ *index = msr_index - MSR_P6_EVNTSEL0;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int ppro_allocate_msr(struct vcpu *v)
+{
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+ struct arch_msr_pair *msr_content;
+
+ msr_content = xmalloc_bytes( sizeof(struct arch_msr_pair) * NUM_COUNTERS );
+ if ( !msr_content )
+ goto out;
+ memset(msr_content, 0, sizeof(struct arch_msr_pair) * NUM_COUNTERS);
+ vpmu->context = (void *)msr_content;
+ vpmu->flags = 0;
+ vpmu->flags |= PASSIVE_DOMAIN_ALLOCATED;
+ return 1;
+out:
+ gdprintk(XENLOG_WARNING, "Insufficient memory for oprofile, oprofile is "
+ "unavailable on domain %d vcpu %d.\n",
+ v->vcpu_id, v->domain->domain_id);
+ return 0;
+}
+
+static void ppro_free_msr(struct vcpu *v)
+{
+ struct vpmu_struct *vpmu = vcpu_vpmu(v);
+
+ xfree(vpmu->context);
+ vpmu->flags &= ~PASSIVE_DOMAIN_ALLOCATED;
+}
+
+static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content)
+{
+ struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
+ switch ( type )
+ {
+ case MSR_TYPE_ARCH_COUNTER:
+ *msr_content = msrs[index].counter;
+ break;
+ case MSR_TYPE_ARCH_CTRL:
+ *msr_content = msrs[index].control;
+ break;
+ }
+}
+
+static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content)
+{
+ struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
+
+ switch ( type )
+ {
+ case MSR_TYPE_ARCH_COUNTER:
+ msrs[index].counter = msr_content;
+ break;
+ case MSR_TYPE_ARCH_CTRL:
+ msrs[index].control = msr_content;
+ break;
+ }
+}
struct op_x86_model_spec const op_ppro_spec = {
.num_counters = NUM_COUNTERS,
.setup_ctrs = &ppro_setup_ctrs,
.check_ctrs = &ppro_check_ctrs,
.start = &ppro_start,
- .stop = &ppro_stop
+ .stop = &ppro_stop,
+ .is_arch_pmu_msr = &ppro_is_arch_pmu_msr,
+ .allocated_msr = &ppro_allocate_msr,
+ .free_msr = &ppro_free_msr,
+ .load_msr = &ppro_load_msr,
+ .save_msr = &ppro_save_msr
};
struct cpu_user_regs * const regs);
void (*start)(struct op_msrs const * const msrs);
void (*stop)(struct op_msrs const * const msrs);
+ int (*is_arch_pmu_msr)(u64 msr_index, int *type, int *index);
+ int (*allocated_msr)(struct vcpu *v);
+ void (*free_msr)(struct vcpu *v);
+ void (*load_msr)(struct vcpu * const v, int type, int index, u64 *msr_content);
+ void (*save_msr)(struct vcpu * const v, int type, int index, u64 msr_content);
};
extern struct op_x86_model_spec const op_ppro_spec;
return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_ACTIVE));
}
-static int is_passive(struct domain *d)
+int is_passive(struct domain *d)
{
struct xenoprof *x = d->xenoprof;
return ((x != NULL) && (x->domain_type == XENOPROF_DOMAIN_PASSIVE));
#define VPMU_CONTEXT_ALLOCATED 0x1
#define VPMU_CONTEXT_LOADED 0x2
#define VPMU_RUNNING 0x4
-
+#define PASSIVE_DOMAIN_ALLOCATED 0x8
int vpmu_do_wrmsr(struct cpu_user_regs *regs);
int vpmu_do_rdmsr(struct cpu_user_regs *regs);
int vpmu_do_interrupt(struct cpu_user_regs *regs);
#ifndef __ASM_X86_HVM_VPMU_CORE_H_
#define __ASM_X86_HVM_VPMU_CORE_H_
-/* Core 2 Non-architectual Performance Counter MSRs. */
-u32 core2_counters_msr[] = {
- MSR_CORE_PERF_FIXED_CTR0,
- MSR_CORE_PERF_FIXED_CTR1,
- MSR_CORE_PERF_FIXED_CTR2};
-
-/* Core 2 Non-architectual Performance Control MSRs. */
-u32 core2_ctrls_msr[] = {
- MSR_CORE_PERF_FIXED_CTR_CTRL,
- MSR_IA32_PEBS_ENABLE,
- MSR_IA32_DS_AREA};
-
-struct pmumsr core2_counters = {
- 3,
- core2_counters_msr
-};
-
-struct pmumsr core2_ctrls = {
- 3,
- core2_ctrls_msr
-};
-
struct arch_msr_pair {
u64 counter;
u64 control;
int acquire_pmu_ownship(int pmu_ownership);
void release_pmu_ownship(int pmu_ownership);
+int passive_domain_do_rdmsr(struct cpu_user_regs *regs);
+int passive_domain_do_wrmsr(struct cpu_user_regs *regs);
+void passive_domain_destroy(struct vcpu *v);
#endif /* __XEN__XENOPROF_H__ */